if (type > OS_BASE && type < OS_END) {
v->domain->arch.vmx_platform.gos_type = type;
gdprintk(XENLOG_INFO, "Guest OS : %s\n", guest_os_name[type - OS_BASE]);
+
+ if (GOS_WINDOWS(v)) {
+ struct xen_ia64_opt_feature optf;
+
+ /* Windows identity maps regions 4 & 5 */
+ optf.cmd = XEN_IA64_OPTF_IDENT_MAP_REG4;
+ optf.on = XEN_IA64_OPTF_ON;
+ optf.pgprot = (_PAGE_P|_PAGE_A|_PAGE_D|_PAGE_MA_WB|_PAGE_AR_RW);
+ optf.key = 0;
+ domain_opt_feature(&optf);
+
+ optf.cmd = XEN_IA64_OPTF_IDENT_MAP_REG5;
+ optf.pgprot = (_PAGE_P|_PAGE_A|_PAGE_D|_PAGE_MA_UC|_PAGE_AR_RW);
+ domain_opt_feature(&optf);
+ }
}
}
thash_vhpt_insert(v, data->page_flags, data->itir, vadr, type);
} else if (type == DSIDE_TLB) {
-
+ struct opt_feature* optf = &(v->domain->arch.opt_feature);
+
if (misr.sp)
return vmx_handle_lds(regs);
itir = rr & (RR_RID_MASK | RR_PS_MASK);
if (!vhpt_enabled(v, vadr, misr.rs ? RSE_REF : DATA_REF)) {
- if (GOS_WINDOWS(v)) {
- /* windows use region 4 and 5 for identity mapping */
- if (REGION_NUMBER(vadr) == 4 && !(regs->cr_ipsr & IA64_PSR_CPL)
- && (REGION_OFFSET(vadr) <= _PAGE_PPN_MASK)) {
-
- pteval = PAGEALIGN(REGION_OFFSET(vadr), itir_ps(itir)) |
- (_PAGE_P | _PAGE_A | _PAGE_D |
- _PAGE_MA_WB | _PAGE_AR_RW);
-
- if (thash_purge_and_insert(v, pteval, itir, vadr, type))
- goto try_again;
-
- return IA64_NO_FAULT;
- }
-
- if (REGION_NUMBER(vadr) == 5 && !(regs->cr_ipsr & IA64_PSR_CPL)
- && (REGION_OFFSET(vadr) <= _PAGE_PPN_MASK)) {
-
- pteval = PAGEALIGN(REGION_OFFSET(vadr),itir_ps(itir)) |
- (_PAGE_P | _PAGE_A | _PAGE_D |
- _PAGE_MA_UC | _PAGE_AR_RW);
-
- if (thash_purge_and_insert(v, pteval, itir, vadr, type))
- goto try_again;
-
- return IA64_NO_FAULT;
- }
+ /* windows use region 4 and 5 for identity mapping */
+ if (optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG4 &&
+ REGION_NUMBER(vadr) == 4 && !(regs->cr_ipsr & IA64_PSR_CPL) &&
+ REGION_OFFSET(vadr) <= _PAGE_PPN_MASK) {
+
+ pteval = PAGEALIGN(REGION_OFFSET(vadr), itir_ps(itir)) |
+ optf->im_reg4.pgprot;
+ if (thash_purge_and_insert(v, pteval, itir, vadr, type))
+ goto try_again;
+ return IA64_NO_FAULT;
+ }
+ if (optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG5 &&
+ REGION_NUMBER(vadr) == 5 && !(regs->cr_ipsr & IA64_PSR_CPL) &&
+ REGION_OFFSET(vadr) <= _PAGE_PPN_MASK) {
+
+ pteval = PAGEALIGN(REGION_OFFSET(vadr), itir_ps(itir)) |
+ optf->im_reg5.pgprot;
+ if (thash_purge_and_insert(v, pteval, itir, vadr, type))
+ goto try_again;
+ return IA64_NO_FAULT;
}
-
if (vpsr.ic) {
vcpu_set_isr(v, misr.val);
alt_dtlb(v, vadr);
}
/* avoid recursively walking (short format) VHPT */
- if (!GOS_WINDOWS(v) &&
+ if (!(optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG4) &&
+ !(optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG5) &&
(((vadr ^ vpta.val) << 3) >> (vpta.size + 3)) == 0) {
if (vpsr.ic) {
dom0_size = parse_size_and_unit(s, NULL);
}
custom_param("dom0_mem", parse_dom0_mem);
+
+/*
+ * Helper function for the optimization stuff handling the identity mapping
+ * feature.
+ */
+static inline void
+optf_set_identity_mapping(unsigned long* mask, struct identity_mapping* im,
+ struct xen_ia64_opt_feature* f)
+{
+ if (f->on) {
+ *mask |= f->cmd;
+ im->pgprot = f->pgprot;
+ im->key = f->key;
+ } else {
+ *mask &= ~(f->cmd);
+ im->pgprot = 0;
+ im->key = 0;
+ }
+}
+
+/* Switch a optimization feature on/off. */
+int
+domain_opt_feature(struct xen_ia64_opt_feature* f)
+{
+ struct opt_feature* optf = &(current->domain->arch.opt_feature);
+ long rc = 0;
+
+ switch (f->cmd) {
+ case XEN_IA64_OPTF_IDENT_MAP_REG4:
+ optf_set_identity_mapping(&optf->mask, &optf->im_reg4, f);
+ break;
+ case XEN_IA64_OPTF_IDENT_MAP_REG5:
+ optf_set_identity_mapping(&optf->mask, &optf->im_reg5, f);
+ break;
+ case XEN_IA64_OPTF_IDENT_MAP_REG7:
+ optf_set_identity_mapping(&optf->mask, &optf->im_reg7, f);
+ break;
+ default:
+ printk("%s: unknown opt_feature: %ld\n", __func__, f->cmd);
+ rc = -ENOSYS;
+ break;
+ }
+ return rc;
+}
+
regs->r10 = fpswa_ret.err1;
regs->r11 = fpswa_ret.err2;
break;
+ case __HYPERVISOR_opt_feature: {
+ XEN_GUEST_HANDLE(void) arg;
+ struct xen_ia64_opt_feature optf;
+ set_xen_guest_handle(arg, (void*)(vcpu_get_gr(v, 32)));
+ if (copy_from_guest(&optf, arg, 1) == 0)
+ regs->r8 = domain_opt_feature(&optf);
+ else
+ regs->r8 = -EFAULT;
+ break;
+ }
default:
printk("unknown ia64 fw hypercall %lx\n", regs->r2);
regs->r8 = do_ni_hypercall();
vcpu_thash(vcpu, address, iha);
if (!(rr & RR_VE_MASK) || !(pta & IA64_PTA_VE)) {
REGS *regs = vcpu_regs(vcpu);
- // NOTE: This is specific code for linux kernel
- // We assume region 7 is identity mapped
- if (region == 7 && ia64_psr(regs)->cpl == CONFIG_CPL0_EMUL) {
+ struct opt_feature* optf = &(vcpu->domain->arch.opt_feature);
+
+ /* Optimization for identity mapped region 7 OS (linux) */
+ if (optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG7 &&
+ region == 7 && ia64_psr(regs)->cpl == CONFIG_CPL0_EMUL) {
pte.val = address & _PAGE_PPN_MASK;
- pte.val = pte.val | pgprot_val(PAGE_KERNEL);
+ pte.val = pte.val | optf->im_reg7.pgprot;
goto out;
}
return is_data ? IA64_ALT_DATA_TLB_VECTOR :
int efi_virt_mode; /* phys : 0 , virt : 1 */
};
+/*
+ * Optimization features
+ * are used by the hypervisor to do some optimizations for guests.
+ * By default the optimizations are switched off and the guest has to activate
+ * the feature. On PV the guest must do this via the hypercall
+ * __HYPERVISOR_opt_feature, on HVM it's done within xen in set_os_type().
+ */
+
+/*
+ * Helper struct for the different identity mapping optimizations.
+ * The hypervisor does the insertion of address translations in the tlb
+ * for identity mapped areas without reflecting the page fault
+ * to the guest.
+ */
+struct identity_mapping {
+ unsigned long pgprot; /* The page protection bit mask of the pte.*/
+ unsigned long key; /* A protection key. */
+};
+
+/* Central structure for optimzation features used by the hypervisor. */
+struct opt_feature {
+ unsigned long mask; /* For every feature one bit. */
+ struct identity_mapping im_reg4; /* Region 4 identity mapping */
+ struct identity_mapping im_reg5; /* Region 5 identity mapping */
+ struct identity_mapping im_reg7; /* Region 7 identity mapping */
+};
+
+/*
+ * The base XEN_IA64_OPTF_IDENT_MAP_REG7 is defined in public/arch-ia64.h.
+ * Identity mapping of region 4 addresses in HVM.
+ */
+#define XEN_IA64_OPTF_IDENT_MAP_REG4 (XEN_IA64_OPTF_IDENT_MAP_REG7 + 1)
+/* Identity mapping of region 5 addresses in HVM. */
+#define XEN_IA64_OPTF_IDENT_MAP_REG5 (XEN_IA64_OPTF_IDENT_MAP_REG4 + 1)
+
+/* Set an optimization feature in the struct arch_domain. */
+extern int domain_opt_feature(struct xen_ia64_opt_feature*);
+
struct arch_domain {
struct mm_struct mm;
struct last_vcpu last_vcpu[NR_CPUS];
+ struct opt_feature opt_feature;
+
#ifdef CONFIG_XEN_IA64_TLB_TRACK
struct tlb_track* tlb_track;
#endif
#define XENCOMM_INLINE_ADDR(addr) \
((unsigned long)(addr) & ~XENCOMM_INLINE_MASK)
+#ifndef __ASSEMBLY__
+
+/*
+ * Optimization features.
+ * The hypervisor may do some special optimizations for guests. This hypercall
+ * can be used to switch on/of these special optimizations.
+ */
+#define __HYPERVISOR_opt_feature 0x700UL
+
+#define XEN_IA64_OPTF_OFF 0x0
+#define XEN_IA64_OPTF_ON 0x1
+
+/*
+ * If this feature is switched on, the hypervisor inserts the
+ * tlb entries without calling the guests traphandler.
+ * This is useful in guests using region 7 for identity mapping
+ * like the linux kernel does.
+ */
+#define XEN_IA64_OPTF_IDENT_MAP_REG7 0x1UL
+
+struct xen_ia64_opt_feature {
+ unsigned long cmd; /* Which feature */
+ unsigned char on; /* Switch feature on/off */
+ union {
+ struct {
+ /* The page protection bit mask of the pte.
+ * This will be or'ed with the pte. */
+ unsigned long pgprot;
+ unsigned long key; /* A protection key for itir. */
+ };
+ };
+};
+
+#endif /* __ASSEMBLY__ */
+
/* xen perfmon */
#ifdef XEN
#ifndef __ASSEMBLY__